In [1]:
import pylearn2.utils
import pylearn2.config
import theano
import neukrill_net.dense_dataset
import neukrill_net.utils
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import holoviews as hl
%load_ext holoviews.ipython
import sklearn.metrics
In [10]:
m = pylearn2.utils.serial.load(
"/disk/scratch/neuroglycerin/models/quicker_learning_1_fc_layer_experiment_no_norms_repeat_recent.pkl")
In [3]:
def make_curves(model, *args):
curves = None
for c in args:
channel = model.monitor.channels[c]
c = c[0].upper() + c[1:]
if not curves:
curves = hl.Curve(zip(channel.example_record,channel.val_record),group=c)
else:
curves += hl.Curve(zip(channel.example_record,channel.val_record),group=c)
return curves
In [11]:
nll_channels = [c for c in m.monitor.channels.keys() if 'nll' in c]
Looks like the nll for most of these is still improving, following a fairly linear dropoff. Maybe it'll keep going from here if we keep running it:
In [12]:
make_curves(m,*nll_channels)
Out[12]:
Plotting all the monitoring channels at the same time, could see something interesting happening:
In [13]:
make_curves(m,*sorted(m.monitor.channels.keys()))
Out[13]:
In [14]:
means = [c for c in sorted(m.monitor.channels.keys()) if "mean" in c and "norm" in c]
make_curves(m,*means)
Out[14]:
In [15]:
%env PYLEARN2_VIEWER_COMMAND=/afs/inf.ed.ac.uk/user/s08/s0805516/repos/neukrill-net-work/image_hack.sh
%run ~/repos/pylearn2/pylearn2/scripts/show_weights.py /disk/scratch/neuroglycerin/models/quicker_learning_1_fc_layer_experiment_no_norms_repeat_recent.pkl
In [16]:
from IPython.display import Image
In [18]:
def plot_recent_pylearn2():
pl2plt = Image(filename="/afs/inf.ed.ac.uk/user/s08/s0805516/tmp/pylearnplot.png", width=700)
return pl2plt
plot_recent_pylearn2()
Out[18]:
In [ ]:
%run ~/repos/pylearn2/pylearn2/scripts/show_weights.py /disk/scratch/neuroglycerin/models/experiment_biglayer_largedecay_recent.pkl
In [20]:
plot_recent_pylearn2()
Out[20]:
In [ ]: